import pandas as pd
import numpy as np
import os
from glob import glob
from multiprocessing import cpu_count, Pool
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import prnu
from multiprocessing import Pool, cpu_count
import numpy as np
import pywt
from numpy.fft import fft2, ifft2
from scipy.ndimage import filters
from sklearn.metrics import roc_curve, auc
from tqdm import tqdm
import tensorflow as tf
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
tf.compat.v1.Session()
dnnlib.tflib.init_tf()
WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\tfutil.py:109: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead. WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\tfutil.py:132: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.
#To generate images set your directory to stylegan folder
#pre traıned model
import pandas as pd
_G,_D,Gs = pd.read_pickle(r'karras2019stylegan-ffhq-1024x1024.pkl')
# _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
# _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
# Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\network.py:142: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead. WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\network.py:150: The name tf.AUTO_REUSE is deprecated. Please use tf.compat.v1.AUTO_REUSE instead. WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\tfutil.py:76: The name tf.VariableScope is deprecated. Please use tf.compat.v1.VariableScope instead. WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\network.py:151: The name tf.get_variable_scope is deprecated. Please use tf.compat.v1.get_variable_scope instead. WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\network.py:154: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\network.py:182: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead. WARNING:tensorflow:From C:\Users\user\Untitled Folder\prnu-python\stylegan\dnnlib\tflib\tfutil.py:200: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead. WARNING:tensorflow:From <string>:364: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where
Gs.print_layers()
Gs Params OutputShape WeightShape --- --- --- --- latents_in - (?, 512) - labels_in - (?, 0) - lod - () - dlatent_avg - (512,) - G_mapping/latents_in - (?, 512) - G_mapping/labels_in - (?, 0) - G_mapping/PixelNorm - (?, 512) - G_mapping/Dense0 262656 (?, 512) (512, 512) G_mapping/Dense1 262656 (?, 512) (512, 512) G_mapping/Dense2 262656 (?, 512) (512, 512) G_mapping/Dense3 262656 (?, 512) (512, 512) G_mapping/Dense4 262656 (?, 512) (512, 512) G_mapping/Dense5 262656 (?, 512) (512, 512) G_mapping/Dense6 262656 (?, 512) (512, 512) G_mapping/Dense7 262656 (?, 512) (512, 512) G_mapping/Broadcast - (?, 18, 512) - G_mapping/dlatents_out - (?, 18, 512) - Truncation - (?, 18, 512) - G_synthesis/dlatents_in - (?, 18, 512) - G_synthesis/4x4/Const 534528 (?, 512, 4, 4) (512,) G_synthesis/4x4/Conv 2885632 (?, 512, 4, 4) (3, 3, 512, 512) G_synthesis/ToRGB_lod8 1539 (?, 3, 4, 4) (1, 1, 512, 3) G_synthesis/8x8/Conv0_up 2885632 (?, 512, 8, 8) (3, 3, 512, 512) G_synthesis/8x8/Conv1 2885632 (?, 512, 8, 8) (3, 3, 512, 512) G_synthesis/ToRGB_lod7 1539 (?, 3, 8, 8) (1, 1, 512, 3) G_synthesis/Upscale2D - (?, 3, 8, 8) - G_synthesis/Grow_lod7 - (?, 3, 8, 8) - G_synthesis/16x16/Conv0_up 2885632 (?, 512, 16, 16) (3, 3, 512, 512) G_synthesis/16x16/Conv1 2885632 (?, 512, 16, 16) (3, 3, 512, 512) G_synthesis/ToRGB_lod6 1539 (?, 3, 16, 16) (1, 1, 512, 3) G_synthesis/Upscale2D_1 - (?, 3, 16, 16) - G_synthesis/Grow_lod6 - (?, 3, 16, 16) - G_synthesis/32x32/Conv0_up 2885632 (?, 512, 32, 32) (3, 3, 512, 512) G_synthesis/32x32/Conv1 2885632 (?, 512, 32, 32) (3, 3, 512, 512) G_synthesis/ToRGB_lod5 1539 (?, 3, 32, 32) (1, 1, 512, 3) G_synthesis/Upscale2D_2 - (?, 3, 32, 32) - G_synthesis/Grow_lod5 - (?, 3, 32, 32) - G_synthesis/64x64/Conv0_up 1442816 (?, 256, 64, 64) (3, 3, 512, 256) G_synthesis/64x64/Conv1 852992 (?, 256, 64, 64) (3, 3, 256, 256) G_synthesis/ToRGB_lod4 771 (?, 3, 64, 64) (1, 1, 256, 3) G_synthesis/Upscale2D_3 - (?, 3, 64, 64) - G_synthesis/Grow_lod4 - (?, 3, 64, 64) - G_synthesis/128x128/Conv0_up 426496 (?, 128, 128, 128) (3, 3, 256, 128) G_synthesis/128x128/Conv1 279040 (?, 128, 128, 128) (3, 3, 128, 128) G_synthesis/ToRGB_lod3 387 (?, 3, 128, 128) (1, 1, 128, 3) G_synthesis/Upscale2D_4 - (?, 3, 128, 128) - G_synthesis/Grow_lod3 - (?, 3, 128, 128) - G_synthesis/256x256/Conv0_up 139520 (?, 64, 256, 256) (3, 3, 128, 64) G_synthesis/256x256/Conv1 102656 (?, 64, 256, 256) (3, 3, 64, 64) G_synthesis/ToRGB_lod2 195 (?, 3, 256, 256) (1, 1, 64, 3) G_synthesis/Upscale2D_5 - (?, 3, 256, 256) - G_synthesis/Grow_lod2 - (?, 3, 256, 256) - G_synthesis/512x512/Conv0_up 51328 (?, 32, 512, 512) (3, 3, 64, 32) G_synthesis/512x512/Conv1 42112 (?, 32, 512, 512) (3, 3, 32, 32) G_synthesis/ToRGB_lod1 99 (?, 3, 512, 512) (1, 1, 32, 3) G_synthesis/Upscale2D_6 - (?, 3, 512, 512) - G_synthesis/Grow_lod1 - (?, 3, 512, 512) - G_synthesis/1024x1024/Conv0_up 21056 (?, 16, 1024, 1024) (3, 3, 32, 16) G_synthesis/1024x1024/Conv1 18752 (?, 16, 1024, 1024) (3, 3, 16, 16) G_synthesis/ToRGB_lod0 51 (?, 3, 1024, 1024) (1, 1, 16, 3) G_synthesis/Upscale2D_7 - (?, 3, 1024, 1024) - G_synthesis/Grow_lod0 - (?, 3, 1024, 1024) - G_synthesis/images_out - (?, 3, 1024, 1024) - G_synthesis/lod - () - G_synthesis/noise0 - (1, 1, 4, 4) - G_synthesis/noise1 - (1, 1, 4, 4) - G_synthesis/noise2 - (1, 1, 8, 8) - G_synthesis/noise3 - (1, 1, 8, 8) - G_synthesis/noise4 - (1, 1, 16, 16) - G_synthesis/noise5 - (1, 1, 16, 16) - G_synthesis/noise6 - (1, 1, 32, 32) - G_synthesis/noise7 - (1, 1, 32, 32) - G_synthesis/noise8 - (1, 1, 64, 64) - G_synthesis/noise9 - (1, 1, 64, 64) - G_synthesis/noise10 - (1, 1, 128, 128) - G_synthesis/noise11 - (1, 1, 128, 128) - G_synthesis/noise12 - (1, 1, 256, 256) - G_synthesis/noise13 - (1, 1, 256, 256) - G_synthesis/noise14 - (1, 1, 512, 512) - G_synthesis/noise15 - (1, 1, 512, 512) - G_synthesis/noise16 - (1, 1, 1024, 1024) - G_synthesis/noise17 - (1, 1, 1024, 1024) - images_out - (?, 3, 1024, 1024) - --- --- --- --- Total 26219627
def make_image():
for i in range (500):
# Pick latent vector.
rnd = np.random.RandomState()
latents = rnd.randn(1, Gs.input_shape[1])
# Generate image.
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, truncation_psi=1.0, randomize_noise=True, output_transform=fmt)
# Save image.
os.makedirs(config.result_dir, exist_ok=True)
png_filename = os.path.join(config.result_dir, 'example_'+str(i)+'.png'.format(1))
PIL.Image.fromarray(images[0], 'RGB').save(png_filename)
print(i)
make_image(500)
from IPython.display import Image
Image(filename='prnu.png')
"""ff_dirlist = GAN Images
nat_dirlist = Natural Face Images
"""
ff_dirlist = np.array(sorted(glob(r'test\data\results\*.PNG')))
ff_device = np.array([os.path.split(i)[1].rsplit('_', 1)[0] for i in ff_dirlist])
nat_dirlist = np.array(sorted(glob(r'test\data\archieve2\*.PNG')))[:500]
nat_device = np.array([os.path.split(i)[1].split('0', 1)[0] for i in nat_dirlist])[:500]
stylegan_psi05 = np.array(sorted(glob(r'test\data\stylegan_psi0.5\*.PNG')))
stylegan_psi05_device = np.array([os.path.split(i)[1].rsplit('_', 1)[0] for i in stylegan_psi05])
stylegan_psi1 = np.array(sorted(glob(r'test\data\stylegan_psi1.0\*.PNG')))
stylegan_psi1_device = np.array([os.path.split(i)[1].rsplit('_', 1)[0] for i in stylegan_psi1])
stylegan2_psi05 = np.array(sorted(glob(r'test\data\stylegan2_psi0.5\*.PNG')))
stylegan2_psi05_device = np.array([os.path.split(i)[1].split('0', 1)[0] for i in stylegan2_psi05])[:500]
stylegan2_psi05=stylegan2_psi05[:500]
stylegan2_psi1 = np.array(sorted(glob(r'test\data\stylegan2_psi1.0\*.PNG')))
stylegan2_psi1_device = np.array([os.path.split(i)[1].split('0',1 )[0] for i in stylegan2_psi1])[:500]
stylegan2_psi1=stylegan2_psi1[:500]
stylegan_psi05.shape,stylegan_psi1.shape,stylegan2_psi05.shape,stylegan2_psi1.shape
((500,), (500,), (500,), (500,))
def threshold(wlet_coeff_energy_avg: np.ndarray, noise_var: float) -> np.ndarray:
"""
Noise variance theshold as from Binghamton toolbox.
:param wlet_coeff_energy_avg:
:param noise_var:
:return: noise variance threshold
"""
res = wlet_coeff_energy_avg - noise_var
return (res + np.abs(res)) / 2
def wiener_adaptive(x: np.ndarray, noise_var: float, **kwargs) -> np.ndarray:
"""
WaveNoise as from Binghamton toolbox.
Wiener adaptive flter aimed at extracting the noise component
For each input pixel the average variance over a neighborhoods of different window sizes is first computed.
The smaller average variance is taken into account when filtering according to Wiener.
:param x: 2D matrix
:param noise_var: Power spectral density of the noise we wish to extract (S)
:param window_size_list: list of window sizes
:return: wiener filtered version of input x
"""
window_size_list = list(kwargs.pop('window_size_list', [3, 5, 7, 9]))
energy = x ** 2
avg_win_energy = np.zeros(x.shape + (len(window_size_list),))
for window_idx, window_size in enumerate(window_size_list):
avg_win_energy[:, :, window_idx] = filters.uniform_filter(energy,
window_size,
mode='constant')
coef_var = threshold(avg_win_energy, noise_var)
coef_var_min = np.min(coef_var, axis=2)
x = x * noise_var / (coef_var_min + noise_var)
return x
def pce(cc: np.ndarray, neigh_radius: int = 2) -> dict:
"""
PCE position and value
:param cc: as from crosscorr2d
:param neigh_radius: radius around the peak to be ignored while computing floor energy
:return: {'peak':(y,x), 'pce': peak to floor ratio, 'cc': cross-correlation value at peak position
"""
assert (cc.ndim == 2)
assert (isinstance(neigh_radius, int))
out = dict()
max_idx = np.argmax(cc.flatten())
max_y, max_x = np.unravel_index(max_idx, cc.shape)
peak_height = cc[max_y, max_x]
cc_nopeaks = cc.copy()
cc_nopeaks[max_y - neigh_radius:max_y + neigh_radius, max_x - neigh_radius:max_x + neigh_radius] = 0
pce_energy = np.mean(cc_nopeaks.flatten() ** 2)
out['peak'] = (max_y, max_x)
out['pce'] = (peak_height ** 2) / pce_energy * np.sign(peak_height)
out['cc'] = peak_height
return out
def noise_extract(im: np.ndarray, levels: int = 4, sigma: float = 5) -> np.ndarray:
"""
NoiseExtract as from Binghamton toolbox.
:param im: grayscale or color image, np.uint8
:param levels: number of wavelet decomposition levels
:param sigma: estimated noise power
:return: noise residual
"""
assert (im.dtype == np.uint8)
assert (im.ndim in [2, 3])
im = im.astype(np.float32)
noise_var = sigma ** 2
if im.ndim == 2:
im.shape += (1,)
W = np.zeros(im.shape, np.float32)
for ch in range(im.shape[2]):
wlet = None
while wlet is None and levels > 0:
try:
wlet = pywt.wavedec2(im[:, :, ch], 'db4', level=levels)
except ValueError:
levels -= 1
wlet = None
if wlet is None:
raise ValueError('Impossible to compute Wavelet filtering for input size: {}'.format(im.shape))
wlet_details = wlet[1:]
wlet_details_filter = [None] * len(wlet_details)
# Cycle over Wavelet levels 1:levels-1
for wlet_level_idx, wlet_level in enumerate(wlet_details):
# Cycle over H,V,D components
level_coeff_filt = [None] * 3
for wlet_coeff_idx, wlet_coeff in enumerate(wlet_level):
level_coeff_filt[wlet_coeff_idx] = wiener_adaptive(wlet_coeff, noise_var)
wlet_details_filter[wlet_level_idx] = tuple(level_coeff_filt)
# Set filtered detail coefficients for Levels > 0 ---
wlet[1:] = wlet_details_filter
# Set to 0 all Level 0 approximation coefficients ---
wlet[0][...] = 0
# Invert wavelet transform ---
wrec = pywt.waverec2(wlet, 'db4')
try:
W[:, :, ch] = wrec
except ValueError:
W = np.zeros(wrec.shape[:2] + (im.shape[2],), np.float32)
W[:, :, ch] = wrec
if W.shape[2] == 1:
W.shape = W.shape[:2]
W = W[:im.shape[0], :im.shape[1]]
return W
import cv2
im = cv2.imread(ff_dirlist[0])
print(type(im))
# <class 'numpy.ndarray'>
print(im.shape)
print(type(im.shape))
# (225, 400, 3)
# <class 'tuple'>
<class 'numpy.ndarray'> (1024, 1024, 3) <class 'tuple'>
from PIL import Image
basewidth = 300
img = Image.open(stylegan_psi05[0])
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
img
from PIL import Image
basewidth = 300
img_2 = Image.open(stylegan2_psi05[1])
wpercent = (basewidth/float(img_2.size[0]))
hsize = int((float(img_2.size[1])*float(wpercent)))
img_2 = img_2.resize((basewidth,hsize), Image.ANTIALIAS)
img_2
from PIL import Image
basewidth = 300
img_3 = Image.open(stylegan2_psi1[1])
img_4 = Image.open(stylegan_psi1[1])
img_5 = Image.open(nat_dirlist[1])
plt.subplot(151)
plt.imshow(img, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('StyleGAN_psi0.5', fontsize=10)
plt.subplot(152)
plt.imshow(img_2, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('StyleGAN2_psi0.5', fontsize=10)
plt.subplot(153)
plt.imshow(img_3, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('StyleGAN2_psi1', fontsize=10)
plt.subplot(154)
plt.imshow(img_4, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('StyleGAN_psi1', fontsize=10)
plt.subplot(155)
plt.imshow(img_5, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('Natural', fontsize=10)
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0, left=0,
right=1)
plt.show()
def rgb2gray(im: np.ndarray) -> np.ndarray:
"""
RGB to gray as from Binghamton toolbox.
:param im: multidimensional array
:return: grayscale version of input im
"""
rgb2gray_vector = np.asarray([0.29893602, 0.58704307, 0.11402090]).astype(np.float32)
rgb2gray_vector.shape = (3, 1)
if im.ndim == 2:
im_gray = np.copy(im)
elif im.shape[2] == 1:
im_gray = np.copy(im[:, :, 0])
elif im.shape[2] == 3:
w, h = im.shape[:2]
im = np.reshape(im, (w * h, 3))
im_gray = np.dot(im, rgb2gray_vector)
im_gray.shape = (w, h)
else:
raise ValueError('Input image must have 1 or 3 channels')
return im_gray.astype(np.float32)
rgb2gray(np.array(img)).shape
(300, 300)
d=noise_extract(np.array(img))
Image.fromarray(d, 'RGB')
c=noise_extract(np.array(img_2))
Image.fromarray(c, 'RGB')
def wiener_dft(im: np.ndarray, sigma: float) -> np.ndarray:
"""
Adaptive Wiener filter applied to the 2D FFT of the image
:param im: multidimensional array
:param sigma: estimated noise power
:return: filtered version of input im
"""
noise_var = sigma ** 2
h, w = im.shape
im_noise_fft = fft2(im)
im_noise_fft_mag = np.abs(im_noise_fft / (h * w) ** .5)
im_noise_fft_mag_noise = wiener_adaptive(im_noise_fft_mag, noise_var)
zeros_y, zeros_x = np.nonzero(im_noise_fft_mag == 0)
im_noise_fft_mag[zeros_y, zeros_x] = 1
im_noise_fft_mag_noise[zeros_y, zeros_x] = 0
im_noise_fft_filt = im_noise_fft * im_noise_fft_mag_noise / im_noise_fft_mag
im_noise_filt = np.real(ifft2(im_noise_fft_filt))
return im_noise_filt.astype(np.float32)
def crosscorr_2d(k1: np.ndarray, k2: np.ndarray) -> np.ndarray:
"""
PRNU 2D cross-correlation
:param k1: 2D matrix of size (h1,w1)
:param k2: 2D matrix of size (h2,w2)
:return: 2D matrix of size (max(h1,h2),max(w1,w2))
"""
assert (k1.ndim == 2)
assert (k2.ndim == 2)
max_height = max(k1.shape[0], k2.shape[0])
max_width = max(k1.shape[1], k2.shape[1])
k1 -= k1.flatten().mean()
k2 -= k2.flatten().mean()
k1 = np.pad(k1, [(0, max_height - k1.shape[0]), (0, max_width - k1.shape[1])], mode='constant', constant_values=0)
k2 = np.pad(k2, [(0, max_height - k2.shape[0]), (0, max_width - k2.shape[1])], mode='constant', constant_values=0)
k1_fft = fft2(k1, )
k2_fft = fft2(np.rot90(k2, 2), )
return np.real(ifft2(k1_fft * k2_fft)).astype(np.float32)
def extract_multiple_aligned(imgs: list, levels: int = 4, sigma: float = 5, processes: int = None,
batch_size=cpu_count(), tqdm_str: str = '') -> np.ndarray:
"""
Extract PRNU from a list of images. Images are supposed to be the same size and properly oriented
:param tqdm_str: tqdm description (see tqdm documentation)
:param batch_size: number of parallel processed images
:param processes: number of parallel processes
:param imgs: list of images of size (H,W,Ch) and type np.uint8
:param levels: number of wavelet decomposition levels
:param sigma: estimated noise power
:return: PRNU
"""
assert (isinstance(imgs[0], np.ndarray))
assert (imgs[0].ndim == 3)
assert (imgs[0].dtype == np.uint8)
h, w, ch = imgs[0].shape
RPsum = np.zeros((h, w, ch), np.float32)
NN = np.zeros((h, w, ch), np.float32)
if processes is None or processes > 1:
args_list = []
for im in imgs:
args_list += [(im, levels, sigma)]
pool = Pool(processes=processes)
for batch_idx0 in tqdm(np.arange(start=0, step=batch_size, stop=len(imgs)), disable=tqdm_str == '',
desc=(tqdm_str + ' (1/2)'), dynamic_ncols=True):
nni = pool.map(inten_sat_compact, args_list[batch_idx0:batch_idx0 + batch_size])
for ni in nni:
NN += ni
del nni
for batch_idx0 in tqdm(np.arange(start=0, step=batch_size, stop=len(imgs)), disable=tqdm_str == '',
desc=(tqdm_str + ' (2/2)'), dynamic_ncols=True):
wi_list = pool.map(noise_extract_compact, args_list[batch_idx0:batch_idx0 + batch_size])
for wi in wi_list:
RPsum += wi
del wi_list
pool.close()
else: # Single process
for im in tqdm(imgs, disable=tqdm_str is None, desc=tqdm_str, dynamic_ncols=True):
RPsum += noise_extract_compact((im, levels, sigma))
NN += (inten_scale(im) * saturation(im)) ** 2
K = RPsum / (NN + 1)
K = rgb2gray(K)
K = zero_mean_total(K)
K = wiener_dft(K, K.std(ddof=1)).astype(np.float32)
return K
print('Computing fingerprints for StyleGAN psi=1.0')
fingerprint_stylegan_psi1 = sorted(np.unique(stylegan_psi1_device))
stg_ps1 = []
for device in fingerprint_stylegan_psi1:
imgs = []
for img_path in stylegan_psi1[stylegan_psi1_device == device]:
im = Image.open(img_path)
#im = im.resize((2000, 3008), Image.ANTIALIAS)
im_arr = np.asarray(im)
if im_arr.dtype != np.uint8:
print('Error while reading image: {}'.format(img_path))
continue
if im_arr.ndim != 3:
print('Image is not RGB: {}'.format(img_path))
continue
im_cut2 = prnu.cut_ctr(im_arr, (512, 512, 3))
imgs += [im_cut2]
stg_ps1 += [prnu.extract_multiple_aligned(imgs, processes=cpu_count())]
stg_ps1 = np.stack(stg_ps1, 0)
Computing fingerprints for StyleGAN psi=1.0
stg_ps1.shape
(1, 512, 512)
print('Computing fingerprints for StyleGAN2 psi=1.0')
fingerprint_stylegan2_psi1 = sorted(np.unique(stylegan2_psi1_device))
stg2_ps1 = []
for device in fingerprint_stylegan2_psi1:
imgs = []
for img_path in stylegan2_psi1[stylegan2_psi1_device == device]:
im = Image.open(img_path)
#im = im.resize((2000, 3008), Image.ANTIALIAS)
im_arr = np.asarray(im)
if im_arr.dtype != np.uint8:
print('Error while reading image: {}'.format(img_path))
continue
if im_arr.ndim != 3:
print('Image is not RGB: {}'.format(img_path))
continue
im_cut2 = prnu.cut_ctr(im_arr, (512, 512, 3))
imgs += [im_cut2]
stg2_ps1 += [prnu.extract_multiple_aligned(imgs, processes=cpu_count())]
stg2_ps1 = np.stack(stg2_ps1, 0)
Computing fingerprints for StyleGAN2 psi=1.0
stg2_ps1.shape
(1, 512, 512)
stg2_ps1
array([[[ 0.24823774, 0.02469793, -0.03474345, ..., 0.6418302 ,
-0.11476105, 0.18489365],
[-0.1565385 , 0.0808139 , 0.25029808, ..., -0.2669261 ,
-0.39518213, 0.07902094],
[ 0.18919197, 0.30362675, -0.32270437, ..., 0.24880326,
-0.03878288, 0.08044937],
...,
[-0.31729034, -0.34563464, 0.02844886, ..., -0.17093077,
0.14621177, -0.00345003],
[-0.2147267 , -0.28933984, -0.17443088, ..., -0.15213154,
-0.1091008 , -0.26366845],
[-0.4549669 , -0.42508656, -0.10889002, ..., -0.25044107,
-0.11401205, -0.25126597]]], dtype=float32)
stg_ps1
array([[[ 0.40432537, 0.39162648, -0.25188863, ..., 0.6978138 ,
0.5953538 , 0.00078652],
[ 0.61472356, 0.12943153, 0.10644206, ..., 0.0662146 ,
-0.44741845, -0.41822878],
[ 0.39647764, 0.4855263 , 0.7520549 , ..., 0.30391923,
-0.1270575 , 0.29227784],
...,
[ 0.12323871, 0.12040471, -0.23154517, ..., -0.10525158,
0.13861366, 0.10875019],
[ 0.37496755, 0.17548351, 0.2793322 , ..., -0.36677593,
-0.0676221 , 0.23318025],
[ 0.14202394, -0.06342326, 0.08185127, ..., -0.510799 ,
-0.22246107, -0.11330815]]], dtype=float32)
print('Computing fingerprints for StyleGAN psi=0.5')
fingerprint_stylegan_psi05 = sorted(np.unique(stylegan_psi05_device))
stg_ps05 = []
for device in fingerprint_stylegan_psi05:
imgs = []
for img_path in stylegan_psi05[stylegan_psi05_device == device]:
im = Image.open(img_path)
#im = im.resize((2000, 3008), Image.ANTIALIAS)
im_arr = np.asarray(im)
if im_arr.dtype != np.uint8:
print('Error while reading image: {}'.format(img_path))
continue
if im_arr.ndim != 3:
print('Image is not RGB: {}'.format(img_path))
continue
im_cut2 = prnu.cut_ctr(im_arr, (512, 512, 3))
imgs += [im_cut2]
stg_ps05 += [prnu.extract_multiple_aligned(imgs, processes=cpu_count())]
stg_ps05 = np.stack(stg_ps05, 0)
Computing fingerprints for StyleGAN psi=0.5
stg_ps05.shape
stg_ps05
array([[[ 0.6241586 , -0.22797832, 0.10719831, ..., 0.75395876,
0.72783816, 1.3660004 ],
[ 0.16785237, -1.6427523 , 0.772056 , ..., -0.42739093,
-0.08630094, 0.08583885],
[ 0.4121152 , 0.10297816, 1.045219 , ..., 0.93081903,
0.39858156, -0.03737947],
...,
[-0.38761693, -0.42904305, 0.06080198, ..., -0.0319345 ,
-0.01931673, -0.09963929],
[-0.326597 , -0.19424908, 0.5021167 , ..., -0.12286449,
-0.3192697 , -0.05525027],
[-0.47375783, -0.7964915 , -0.22155897, ..., -0.49982098,
-0.22038022, 0.03944532]]], dtype=float32)
print('Computing fingerprints for StyleGAN2 psi=0.5')
fingerprint_stylegan2_psi05 = sorted(np.unique(stylegan2_psi05_device))
stg2_ps05 = []
for device in fingerprint_stylegan2_psi05:
imgs = []
for img_path in stylegan2_psi05[stylegan2_psi05_device[:500] == device]:
im = Image.open(img_path)
#im = im.resize((2000, 3008), Image.ANTIALIAS)
im_arr = np.asarray(im)
if im_arr.dtype != np.uint8:
print('Error while reading image: {}'.format(img_path))
continue
if im_arr.ndim != 3:
print('Image is not RGB: {}'.format(img_path))
continue
im_cut2 = prnu.cut_ctr(im_arr, (512, 512, 3))
imgs += [im_cut2]
stg2_ps05 += [prnu.extract_multiple_aligned(imgs, processes=cpu_count())]
stg2_ps05 = np.stack(stg2_ps05, 0)
Computing fingerprints for StyleGAN2 psi=0.5
stg2_ps05.shape
(1, 512, 512)
print('Computing fingerprints')
normal_device = sorted(np.unique(nat_device))
c = []
for device in normal_device:
imgss = []
for img_path in nat_dirlist[nat_device == device]:
im = Image.open(img_path)
#im = im.resize((2000, 3008), Image.ANTIALIAS)
im_arr = np.asarray(im)
if im_arr.dtype != np.uint8:
print('Error while reading image: {}'.format(img_path))
continue
if im_arr.ndim != 3:
print('Image is not RGB: {}'.format(img_path))
continue
im_cut1 = prnu.cut_ctr(im_arr, (512, 512, 3))
imgss += [im_cut1]
c += [prnu.extract_multiple_aligned(imgss, processes=cpu_count())]
c = np.stack(c, 0)
Computing fingerprints
c.shape
(1, 512, 512)
print('Computing residuals')
imgs = []
for img_path in nat_dirlist:
imgs += [prnu.cut_ctr(np.asarray(Image.open(img_path)), (512, 512, 3))]
pool = Pool(cpu_count())
w = pool.map(prnu.extract_single, imgs)
pool.close()
w = np.stack(w, 0)
Computing residuals
w.shape
(500, 512, 512)
print('Computing residuals')
#stylegan_psi1_w=[]
imgs = []
for img_path in stylegan_psi1:
imgs += [prnu.cut_ctr(np.asarray(Image.open(img_path)), (512, 512, 3))]
pool = Pool(cpu_count())
stylegan_psi1_w = pool.map(prnu.extract_single, imgs)
pool.close()
stylegan_psi1_w = np.stack(stylegan_psi1_w, 0)
Computing residuals
print('Computing residuals')
imgs = []
for img_path in stylegan2_psi1:
imgs += [prnu.cut_ctr(np.asarray(Image.open(img_path)), (512, 512, 3))]
pool = Pool(cpu_count())
stylegan2_psi1_w = pool.map(prnu.extract_single, imgs)
pool.close()
stylegan2_psi1_w = np.stack(stylegan2_psi1_w, 0)
Computing residuals
stylegan2_psi1_w.shape
(500, 512, 512)
print('Computing residuals')
imgs = []
for img_path in stylegan_psi05:
imgs += [prnu.cut_ctr(np.asarray(Image.open(img_path)), (512, 512, 3))]
pool = Pool(cpu_count())
stylegan_psi05_w = pool.map(prnu.extract_single, imgs)
pool.close()
stylegan_psi05_w = np.stack(stylegan_psi05_w, 0)
Computing residuals
print('Computing residuals')
imgs = []
for img_path in stylegan2_psi05:
imgs += [prnu.cut_ctr(np.asarray(Image.open(img_path)), (512, 512, 3))]
pool = Pool(cpu_count())
stylegan2_psi05_w = pool.map(prnu.extract_single, imgs)
pool.close()
stylegan2_psi05_w = np.stack(stylegan2_psi05_w, 0)
Computing residuals
stylegan2_psi05_w
array([[[ 2.04616189e-01, -1.01863921e+00, -1.43706226e+00, ...,
1.98888421e+00, -4.84552622e+00, -7.66749680e-01],
[ 6.33472741e-01, -2.06804013e+00, -2.87042427e+00, ...,
4.47351408e+00, -2.58911937e-01, -1.86816227e+00],
[ 2.13606071e+00, -4.17354870e+00, -2.11144710e+00, ...,
-4.53416491e+00, 4.08587551e+00, -4.04173613e+00],
...,
[ 1.15429866e+00, -6.91441357e-01, 1.14656538e-01, ...,
-1.34751067e-01, -1.35562968e+00, -9.05156016e-01],
[ 3.82139653e-01, -1.42756689e+00, -1.47295368e+00, ...,
5.53542256e-01, 1.21271580e-01, -2.99411893e-01],
[-1.01359499e+00, -6.74999416e-01, -2.80121708e+00, ...,
1.20783520e+00, 1.62669623e+00, 1.68624997e-01]],
[[-4.62847650e-01, -2.57518935e+00, 3.77967763e+00, ...,
-2.33853515e-03, -5.82103634e+00, -3.86768103e+00],
[ 4.81626868e-01, 6.59199810e+00, 1.41322911e+00, ...,
4.51318836e+00, 1.44330740e+00, -2.11640763e+00],
[ 5.71519566e+00, -1.73221827e+00, -2.52452445e+00, ...,
-1.38943791e-01, -4.26732123e-01, 2.83087611e-01],
...,
[ 3.32211196e-01, 4.46825504e+00, 5.59985065e+00, ...,
1.47309566e+00, 1.84338796e+00, 6.29830837e-01],
[ 7.48569727e-01, -2.77618349e-01, -9.75789905e-01, ...,
8.58338699e-02, 7.84621298e-01, -7.33662486e-01],
[ 3.88537025e+00, -3.19749385e-01, -2.56510758e+00, ...,
-1.14698482e+00, 1.97455716e+00, 2.41878939e+00]],
[[-1.22459781e+00, 9.25203860e-01, -3.56069207e+00, ...,
-6.88289523e-01, 2.72072101e+00, 2.42560315e+00],
[-2.12065816e+00, -1.31083059e+00, 6.19575642e-02, ...,
-1.00482607e+00, 6.57156825e-01, 3.69390178e+00],
[-2.89822197e+00, 3.91538739e-01, 3.01851583e+00, ...,
3.21430612e+00, -2.96551800e+00, 1.68057680e+00],
...,
[-8.19563568e-01, -1.09326208e+00, 8.15090165e-02, ...,
9.23141614e-02, 8.50044489e-01, -4.49209958e-02],
[-1.07429862e+00, -8.72046173e-01, -1.87510759e-01, ...,
-4.51299906e-01, -3.14396888e-01, -2.45221078e-01],
[-2.53562868e-01, -6.59026027e-01, 5.62391356e-02, ...,
2.85453975e-01, -6.72966421e-01, -7.80801594e-01]],
...,
[[ 4.09202874e-01, -2.16477704e+00, -1.33685732e+00, ...,
6.26661956e-01, -1.69648635e+00, 1.14765966e+00],
[-6.11324608e-01, -1.46263731e+00, 5.02329171e-01, ...,
1.89440024e+00, 3.77088010e-01, -4.81952488e-01],
[-8.16587627e-01, -8.43480527e-01, -6.80629313e-01, ...,
-3.80911082e-01, 7.75632143e-01, 1.43016458e+00],
...,
[-8.62850428e-01, -5.97268455e-02, -2.32076621e+00, ...,
-3.94508451e-01, -1.40238211e-01, 8.14994454e-01],
[-1.88516319e+00, -1.77825320e+00, 2.94450879e+00, ...,
-2.05334887e-01, 1.53180182e-01, 1.34431088e+00],
[-1.46972930e+00, -1.01108992e+00, 2.57260472e-01, ...,
-1.44747949e+00, -1.15590203e+00, 1.05527902e+00]],
[[ 4.03997850e+00, -7.34005630e-01, 2.56313145e-01, ...,
-1.19007275e-01, -6.37884915e-01, -4.53511834e-01],
[ 2.11708620e-01, -7.18083620e-01, -5.61081246e-03, ...,
-1.00378044e-01, -1.42111838e+00, -9.87072945e-01],
[ 2.12071109e+00, 1.58937305e-01, 1.37136483e+00, ...,
-7.05630779e-01, -8.95331025e-01, -6.12974428e-02],
...,
[ 1.64694417e+00, 1.12064087e+00, 2.88810909e-01, ...,
1.93721259e+00, -5.46663463e-01, -2.24808264e+00],
[-1.16505757e-01, -1.03227091e+00, -3.96065295e-01, ...,
9.43170130e-01, -4.81237531e-01, -1.76714098e+00],
[ 6.67893946e-01, -6.65897429e-01, -8.48173082e-01, ...,
1.70052081e-01, -1.04281044e+00, -4.57739019e+00]],
[[-6.04460907e+00, 6.05607557e+00, -2.38166380e+00, ...,
2.98707813e-01, 3.72313666e+00, 1.61746967e+00],
[-3.91551423e+00, 7.60167933e+00, -2.10555935e+00, ...,
3.19988281e-01, 1.98965371e+00, 3.04612070e-01],
[-4.41497594e-01, 4.27943611e+00, -2.46940994e+00, ...,
-3.23498416e+00, 1.48617256e+00, 5.61233521e-01],
...,
[ 9.69780743e-01, 1.23910487e+00, -2.21631631e-01, ...,
-3.57993282e-02, -3.95015627e-01, -3.74649137e-01],
[ 6.81391358e-01, 2.73380876e-01, -4.88212379e-03, ...,
5.01719356e-01, -1.70937300e-01, 5.12737215e-01],
[ 8.33239794e-01, -2.53116101e-01, -8.10810387e-01, ...,
3.98399562e-01, -3.44598472e-01, 1.71689844e+00]]],
dtype=float32)
w.shape
(500, 512, 512)
w[[1]].shape
(1, 512, 512)
stg_ps1.shape
(1, 512, 512)
pd.DataFrame(prnu.aligned_cc(stg_ps05,stg_ps1)['ncc'])
| 0 | |
|---|---|
| 0 | 0.059222 |
stg_ps1_stg2_ps1_corr=pd.DataFrame()
stg_ps1_stg2_ps1_corr=pd.DataFrame()
stg_ps05_stg_ps1_corr=pd.DataFrame()
stg_ps05_stg2_ps05_corr=pd.DataFrame()
stg2_ps05_stg2_ps1_corr=pd.DataFrame()
stg_ps05_stg2_ps1_corr=pd.DataFrame()
stg_ps05_stg_ps05_corr=pd.DataFrame()
stg_ps1_stg_ps1_corr=pd.DataFrame()
stg2_ps05_stg2_ps05_corr=pd.DataFrame()
stg2_ps1_stg2_ps1_corr=pd.DataFrame()
stg_ps05_normal_corr=pd.DataFrame()
stg_ps1_normal_corr=pd.DataFrame()
stg2_ps05_normal_corr=pd.DataFrame()
stg2_ps1_normal_corr=pd.DataFrame()
normal_normal_corr=pd.DataFrame()
stg_ps05_stg_ps05_corr=pd.DataFrame(prnu.aligned_cc(stg_ps05,stylegan_psi05_w[[0]])['ncc'])
stg_ps05_stg_ps05_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps05, stylegan_psi05_w[[i]])['ncc'])
stg_ps05_stg_ps05_corr=stg_ps05_stg_ps05_corr.append(b)
stg_ps05_stg_ps05_corr=stg_ps05_stg_ps05_corr.iloc[1:]
stg_ps05_stg_ps05_corr.columns=["Corr_Value","Sıl"]
stg_ps05_stg_ps05_corr.drop("Sıl",inplace=True,axis=1)
stg_ps05_stg_ps05_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=0.5 - StyleGan psi=0.5 Pic", stg_ps05_stg_ps05_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=0.5 - StyleGan psi=0.5 Pic 0.04197201
stg_ps1_stg_ps1_corr=pd.DataFrame(prnu.aligned_cc(stg_ps1,stylegan_psi1_w[[0]])['ncc'])
stg_ps1_stg_ps1_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps1, stylegan_psi1_w[[i]])['ncc'])
stg_ps1_stg_ps1_corr=stg_ps1_stg_ps1_corr.append(b)
stg_ps1_stg_ps1_corr=stg_ps1_stg_ps1_corr.iloc[1:]
stg_ps1_stg_ps1_corr.columns=["Corr_Value","Sıl"]
stg_ps1_stg_ps1_corr.drop("Sıl",inplace=True,axis=1)
stg_ps1_stg_ps1_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=1 - StyleGan psi=1 Pic", stg_ps1_stg_ps1_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=1 - StyleGan psi=1 Pic 0.039275397
stg2_ps05_stg2_ps05_corr=pd.DataFrame(prnu.aligned_cc(stg2_ps05,stylegan2_psi05_w[[0]])['ncc'])
stg2_ps05_stg2_ps05_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg2_ps05, stylegan2_psi05_w[[i]])['ncc'])
stg2_ps05_stg2_ps05_corr=stg2_ps05_stg2_ps05_corr.append(b)
stg2_ps05_stg2_ps05_corr=stg2_ps05_stg2_ps05_corr.iloc[1:]
stg2_ps05_stg2_ps05_corr.columns=["Corr_Value","Sıl"]
stg2_ps05_stg2_ps05_corr.drop("Sıl",inplace=True,axis=1)
stg2_ps05_stg2_ps05_corr.Corr_Value.hist()
print("Correlation mean of StyleGan2 for psi=0.5 - StyleGan2 psi=0.5 Pic", stg2_ps05_stg2_ps05_corr.Corr_Value.mean())
Correlation mean of StyleGan2 for psi=0.5 - StyleGan2 psi=0.5 Pic 0.04182502
stg2_ps1_stg2_ps1_corr=pd.DataFrame(prnu.aligned_cc(stg2_ps1,stylegan2_psi1_w[[0]])['ncc'])
stg2_ps1_stg2_ps1_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg2_ps1, stylegan2_psi1_w[[i]])['ncc'])
stg2_ps1_stg2_ps1_corr=stg2_ps1_stg2_ps1_corr.append(b)
stg2_ps1_stg2_ps1_corr=stg2_ps1_stg2_ps1_corr.iloc[1:]
stg2_ps1_stg2_ps1_corr.columns=["Corr_Value","Sıl"]
stg2_ps1_stg2_ps1_corr.drop("Sıl",inplace=True,axis=1)
stg2_ps1_stg2_ps1_corr.Corr_Value.hist()
print("Correlation mean of StyleGan2 for psi=1 - StyleGan2 psi=1 Pic", stg2_ps1_stg2_ps1_corr.Corr_Value.mean())
Correlation mean of StyleGan2 for psi=1 - StyleGan2 psi=1 Pic 0.03852026
stg_ps1_stg2_ps05_corr=pd.DataFrame(prnu.aligned_cc(stg_ps1,stylegan2_psi05_w[[0]])['ncc'])
stg_ps1_stg2_ps05_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps1, stylegan2_psi05_w[[i]])['ncc'])
stg_ps1_stg2_ps05_corr=stg_ps1_stg2_ps05_corr.append(b)
stg_ps1_stg2_ps05_corr=stg_ps1_stg2_ps05_corr.iloc[1:]
stg_ps1_stg2_ps05_corr.columns=["Corr_Value","Sıl"]
stg_ps1_stg2_ps05_corr.drop("Sıl",inplace=True,axis=1)
stg_ps1_stg2_ps05_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=1 - StyleGan2 psi=0.5 Pic", stg_ps1_stg2_ps05_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=1 - StyleGan2 psi=0.5 Pic 0.0013079743
stg_ps05_stg_ps1_corr=pd.DataFrame(prnu.aligned_cc(stg_ps05,stylegan_psi1_w[[0]])['ncc'])
stg_ps05_stg_ps1_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps05, stylegan_psi1_w[[i]])['ncc'])
stg_ps05_stg_ps1_corr=stg_ps05_stg_ps1_corr.append(b)
stg_ps05_stg_ps1_corr=stg_ps05_stg_ps1_corr.iloc[1:]
stg_ps05_stg_ps1_corr.columns=["Corr_Value","Sıl"]
stg_ps05_stg_ps1_corr.drop("Sıl",inplace=True,axis=1)
stg_ps05_stg_ps1_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=0.5 - StyleGan psi=1 Pic", stg_ps05_stg_ps1_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=0.5 - StyleGan psi=1 Pic 0.0024494517
stg_ps05_stg2_ps1_corr=pd.DataFrame(prnu.aligned_cc(stg_ps05,stylegan2_psi1_w[[0]])['ncc'])
stg_ps05_stg_ps1_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps05, stylegan2_psi1_w[[i]])['ncc'])
stg_ps05_stg2_ps1_corr=stg_ps05_stg2_ps1_corr.append(b)
stg_ps05_stg2_ps1_corr=stg_ps05_stg2_ps1_corr.iloc[1:]
stg_ps05_stg2_ps1_corr.columns=["Corr_Value"]
stg_ps05_stg2_ps1_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=0.5 - StyleGan2 psi=1 Pic", stg_ps05_stg2_ps1_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=0.5 - StyleGan2 psi=1 Pic 0.0008610556
stg2_ps05_stg2_ps1_corr=pd.DataFrame(prnu.aligned_cc(stg2_ps05,stylegan2_psi1_w[[0]])['ncc'])
stg2_ps05_stg2_ps1_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg2_ps05, stylegan2_psi1_w[[i]])['ncc'])
stg2_ps05_stg2_ps1_corr=stg2_ps05_stg2_ps1_corr.append(b)
stg2_ps05_stg2_ps1_corr=stg2_ps05_stg2_ps1_corr.iloc[1:]
stg2_ps05_stg2_ps1_corr.columns=["Corr_Value","Sıl"]
stg2_ps05_stg2_ps1_corr.drop("Sıl",inplace=True,axis=1)
stg2_ps05_stg2_ps1_corr.columns=["Corr_Value"]
stg2_ps05_stg2_ps1_corr.Corr_Value.hist()
print("Correlation mean of StyleGan2 for psi=0.5 - StyleGan2 psi=1 Pic", stg2_ps05_stg2_ps1_corr.Corr_Value.mean())
Correlation mean of StyleGan2 for psi=0.5 - StyleGan2 psi=1 Pic 0.010030724
stg_ps05_stg2_ps05_corr
stg_ps05_stg2_ps05_corr=pd.DataFrame(prnu.aligned_cc(stg_ps05,stylegan2_psi05_w[[0]])['ncc'])
stg_ps05_stg2_ps05_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps05, stylegan2_psi05_w[[i]])['ncc'])
stg_ps05_stg2_ps05_corr=stg_ps05_stg2_ps05_corr.append(b)
stg_ps05_stg2_ps05_corr=stg_ps05_stg2_ps05_corr.iloc[1:]
stg_ps05_stg2_ps05_corr.columns=["Corr_Value","Sıl"]
stg_ps05_stg2_ps05_corr.drop("Sıl",inplace=True,axis=1)
stg_ps05_stg2_ps05_corr.columns=["Corr_Value"]
stg_ps05_stg2_ps05_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=0.5 - StyleGan psi=0.5 Pic", stg_ps05_stg2_ps05_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=0.5 - StyleGan psi=0.5 Pic 0.0022130124
stg_ps1_stg2_ps1_corr=pd.DataFrame(prnu.aligned_cc(stg_ps1,stylegan2_psi1_w[[0]])['ncc'])
stg_ps1_stg2_ps1_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps1, stylegan2_psi1_w[[i]])['ncc'])
stg_ps1_stg2_ps1_corr=stg_ps1_stg2_ps1_corr.append(b)
stg_ps1_stg2_ps1_corr=stg_ps1_stg2_ps1_corr.iloc[1:]
stg_ps1_stg2_ps1_corr.columns=["Corr_Value","Sıl"]
stg_ps1_stg2_ps1_corr.drop("Sıl",inplace=True,axis=1)
stg_ps1_stg2_ps1_corr.columns=["Corr_Value"]
stg_ps1_stg2_ps1_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=1 - StyleGan2 psi=1 Pic", stg_ps1_stg2_ps1_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=1 - StyleGan2 psi=1 Pic 0.00071222236
stg_ps05_normal_corr=pd.DataFrame(prnu.aligned_cc(stg_ps05, w[[0]])['ncc'])
stg_ps05_normal_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps05, w[[i]])['ncc'])
stg_ps05_normal_corr=stg_ps05_normal_corr.append(b)
stg_ps05_normal_corr=stg_ps05_normal_corr.iloc[1:]
stg_ps05_normal_corr.columns=["Corr_Value","Sıl"]
stg_ps05_normal_corr.drop("Sıl",inplace=True,axis=1)
stg_ps05_normal_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=0.5 - Normal Pic", stg_ps05_normal_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=0.5 - Normal Pic 4.2724823e-05
stg_ps1_normal_corr=pd.DataFrame(prnu.aligned_cc(stg_ps1, w[[0]])['ncc'])
stg_ps1_normal_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg_ps1, w[[i]])['ncc'])
stg_ps1_normal_corr=stg_ps1_normal_corr.append(b)
stg_ps1_normal_corr=stg_ps1_normal_corr.iloc[1:]
stg_ps1_normal_corr.columns=["Corr_Value","Sıl"]
stg_ps1_normal_corr.drop("Sıl",inplace=True,axis=1)
stg_ps1_normal_corr.Corr_Value.hist()
print("Correlation mean of StyleGan for psi=1 - Normal Pic", stg_ps1_normal_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=1 - Normal Pic 0.00028177295
print("- This shows the correlation between 500 normal images and 500 GAN images' average fingerprint.")
stg_ps1_normal_corr
- This shows the correlation between 500 normal images and 500 GAN images' average fingerprint.
| Corr_Value | |
|---|---|
| 0 | -0.001067 |
| 0 | -0.003791 |
| 0 | 0.002194 |
| 0 | 0.001792 |
| 0 | 0.002685 |
| ... | ... |
| 0 | 0.001814 |
| 0 | -0.003174 |
| 0 | -0.001017 |
| 0 | 0.001692 |
| 0 | -0.003464 |
500 rows × 1 columns
stg2_ps05_normal_corr=pd.DataFrame(prnu.aligned_cc(stg2_ps05, w[[0]])['ncc'])
stg2_ps05_normal_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg2_ps05, w[[i]])['ncc'])
stg2_ps05_normal_corr=stg2_ps05_normal_corr.append(b)
stg2_ps05_normal_corr=stg2_ps05_normal_corr.iloc[1:]
stg2_ps05_normal_corr.columns=["Corr_Value","Sıl"]
stg2_ps05_normal_corr.drop("Sıl",inplace=True,axis=1)
stg2_ps05_normal_corr.Corr_Value.hist()
print("Correlation mean of StyleGan2 for psi=0.5 - Normal Pic", stg2_ps05_normal_corr.Corr_Value.mean())
Correlation mean of StyleGan2 for psi=0.5 - Normal Pic -1.344089e-05
stg2_ps05_normal_corr
| Corr_Value | |
|---|---|
| 0 | -0.002844 |
| 0 | -0.001596 |
| 0 | -0.000229 |
| 0 | 0.001705 |
| 0 | 0.001134 |
| ... | ... |
| 0 | -0.001822 |
| 0 | -0.002067 |
| 0 | 0.001924 |
| 0 | 0.000356 |
| 0 | 0.001591 |
500 rows × 1 columns
stg2_ps1_normal_corr=pd.DataFrame(prnu.aligned_cc(stg2_ps1, w[[0]])['ncc'])
stg2_ps1_normal_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(stg2_ps1, w[[i]])['ncc'])
stg2_ps1_normal_corr=stg2_ps1_normal_corr.append(b)
stg2_ps1_normal_corr=stg2_ps1_normal_corr.iloc[1:]
stg2_ps1_normal_corr.columns=["Corr_Value","Sıl"]
stg2_ps1_normal_corr.drop("Sıl",inplace=True,axis=1)
stg2_ps1_normal_corr.Corr_Value.hist()
print("Correlation mean of StyleGan2 for psi=1 - Normal Pic", stg2_ps1_normal_corr.Corr_Value.mean())
Correlation mean of StyleGan2 for psi=1 - Normal Pic -7.05533e-05
stg2_ps1_normal_corr
| Corr_Value | |
|---|---|
| 0 | -0.006473 |
| 0 | 0.003326 |
| 0 | 0.000535 |
| 0 | -0.002722 |
| 0 | 0.001405 |
| ... | ... |
| 0 | 0.000236 |
| 0 | -0.003573 |
| 0 | 0.002968 |
| 0 | -0.002358 |
| 0 | 0.003772 |
500 rows × 1 columns
normal_normal_corr=pd.DataFrame(prnu.aligned_cc(c, w[[0]])['ncc'])
normal_normal_corr.columns=["Corr Value"]
for i in range(500):
b=pd.DataFrame(prnu.aligned_cc(c, w[[i]])['ncc'])
normal_normal_corr=normal_normal_corr.append(b)
normal_normal_corr=normal_normal_corr.iloc[1:]
normal_normal_corr.columns=["Corr_Value","Sıl"]
normal_normal_corr.drop("Sıl",inplace=True,axis=1)
normal_normal_corr.Corr_Value.hist()
print("Correlation mean of Normal-Normal Pic", normal_normal_corr.Corr_Value.mean())
Correlation mean of Normal-Normal Pic 0.03800994
print("Correlation mean of StyleGan for psi=0.5 - Normal Pic", stg_ps05_normal_corr.Corr_Value.mean())
print("Correlation mean of StyleGan for psi=1 - Normal Pic", stg_ps1_normal_corr.Corr_Value.mean())
print("Correlation mean of StyleGan2 for psi=0.5 - Normal Pic", stg2_ps05_normal_corr.Corr_Value.mean())
print("Correlation mean of StyleGan2 for psi=1 - Normal Pic", stg2_ps1_normal_corr.Corr_Value.mean())
print("Correlation mean of Normal-Normal Pic", normal_normal_corr.Corr_Value.mean())
Correlation mean of StyleGan for psi=0.5 - Normal Pic 4.2724823e-05 Correlation mean of StyleGan for psi=1 - Normal Pic 0.00028177295 Correlation mean of StyleGan2 for psi=0.5 - Normal Pic -1.344089e-05 Correlation mean of StyleGan2 for psi=1 - Normal Pic -7.05533e-05 Correlation mean of Normal-Normal Pic 0.03800994
stg_ps05_stg2_ps05_corr
| Corr_Value | |
|---|---|
| 0 | 0.001487 |
| 0 | -0.001323 |
| 0 | 0.005130 |
| 0 | 0.010521 |
| 0 | -0.005079 |
| ... | ... |
| 0 | -0.002846 |
| 0 | 0.007465 |
| 0 | 0.005167 |
| 0 | 0.001961 |
| 0 | -0.001741 |
500 rows × 1 columns
import seaborn as sns
sns.distplot(stg_ps1_normal_corr["Corr_Value"] , color="blue", label="Normal & StyleGAN psi=1",bins=300)
sns.distplot(stg2_ps1_normal_corr["Corr_Value"] , color="red", label="Normal & StyleGAN2 psi=1",bins=300)
sns.distplot(normal_normal_corr["Corr_Value"] , color="green", label="Normal & Normal",bins=300)
sns.distplot(stg_ps05_stg2_ps05_corr["Corr_Value"] , color="orange", label="Stg_ps05 & Stg2_ps05 ",bins=300)
sns.distplot(stg_ps05_stg2_ps1_corr["Corr_Value"] , color="red", label="Stg_ps05 & Stg2_ps1 ",bins=300)
sns.distplot(stg_ps1_stg2_ps1_corr["Corr_Value"] , color="cyan", label="Stg_ps1 & Stg2_ps1 ",bins=300)
sns.distplot(stg_ps1_stg2_ps05_corr["Corr_Value"] , color="plum", label="Stg_ps1 & Stg2_ps05 ",bins=300)
sns.distplot(stg2_ps05_stg2_ps1_corr["Corr_Value"] , color="black", label="Stg2_ps1 & Stg2_ps05 ",bins=300)
sns.distplot(stg2_ps1_stg2_ps1_corr["Corr_Value"] , color="brown", label="Stg_ps05 & Stg2_ps1 ",bins=300)
sns.distplot(stg2_ps05_stg2_ps05_corr["Corr_Value"] , color="indigo", label="Stg2_ps05 & Stg2_ps05 ",bins=300)
sns.distplot(stg_ps05_stg_ps05_corr["Corr_Value"] , color="sienna", label="Stg_ps05 & Stg_ps05 ",bins=300)
sns.distplot(stg_ps1_stg_ps1_corr["Corr_Value"] , color="palegreen", label="Stg_ps1 & Stg_ps1 ",bins=300)
plt.legend()
plt.title('Histogram of correlations')
C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning)
Text(0.5, 1.0, 'Histogram of correlations')
stg_ps05_normal_max = stg_ps05_normal_corr.mean()
stg_ps1_normal_max = stg_ps1_normal_corr.mean()
stg2_ps05_normal_max = stg2_ps05_normal_corr.mean()
stg2_ps1_normal_max = stg2_ps1_normal_corr.mean()
normal_normal_max = normal_normal_corr.mean()
stg_ps1_stg2_ps1_max = stg_ps1_stg2_ps1_corr.mean()
stg_ps1_stg2_ps05_max = stg_ps1_stg2_ps05_corr.mean()
stg_ps05_stg_ps1_max = stg_ps05_stg_ps1_corr.mean()
stg_ps05_stg2_ps05_max = stg_ps05_stg2_ps05_corr.mean()
stg2_ps05_stg2_ps1_max = stg2_ps05_stg2_ps1_corr.mean()
stg_ps05_stg2_ps1_max = stg_ps05_stg2_ps1_corr.mean()
stg_ps05_stg_ps05_max = stg_ps05_stg_ps05_corr.mean()
stg_ps1_stg_ps1_max = stg_ps1_stg_ps1_corr.mean()
stg2_ps05_stg2_ps05_max = stg2_ps05_stg2_ps05_corr.mean()
stg2_ps1_stg2_ps1_max = stg2_ps1_stg2_ps1_corr.mean()
stg_ps05_dt=[stg_ps05_normal_max[0],stg_ps05_stg_ps1_max[0],stg_ps05_stg2_ps05_max[0],stg_ps05_stg2_ps1_max[0]]
stg_ps05_dt=[stg_ps05_stg_ps05_max[0],stg_ps05_normal_max[0],stg_ps05_stg_ps1_max[0],stg_ps05_stg2_ps05_max[0],stg_ps05_stg2_ps1_max[0]]
from pandas import DataFrame
stg_ps05_dt=DataFrame(stg_ps05_dt).T
stg_ps05_dt.columns=["stg_ps05","Normal","stg_ps1","stg2_ps05","stg2_ps1"]
stg_ps05_dt
| stg_ps05 | Normal | stg_ps1 | stg2_ps05 | stg2_ps1 | |
|---|---|---|---|---|---|
| 0 | 0.041972 | 0.000043 | 0.002449 | 0.002213 | 0.000861 |
stg_ps1_dt=[stg_ps05_stg_ps1_max[0],stg_ps1_normal_max[0],stg_ps1_stg_ps1_max[0],stg_ps1_stg2_ps05_max[0],stg_ps1_stg2_ps1_max[0]]
from pandas import DataFrame
stg_ps1_dt=DataFrame(stg_ps1_dt).T
stg_ps1_dt.columns=["stg_ps05","Normal","stg_ps1","stg2_ps05","stg2_ps1"]
stg_ps1_dt
| stg_ps05 | Normal | stg_ps1 | stg2_ps05 | stg2_ps1 | |
|---|---|---|---|---|---|
| 0 | 0.002449 | 0.000282 | 0.039275 | 0.001308 | 0.000712 |
stg2_ps05_normal_max[0]
-1.344089e-05
stg2_ps05_dt=[stg_ps05_stg2_ps05_max[0],stg2_ps05_normal_max[0],stg2_ps05_stg2_ps1_max[0],stg2_ps05_stg2_ps05_max[0],stg2_ps05_stg2_ps1_max[0]]
from pandas import DataFrame
stg2_ps05_dt=DataFrame(stg2_ps05_dt).T
stg2_ps05_dt.columns=["stg_ps05","Normal","stg_ps1","stg2_ps05","stg2_ps1"]
stg2_ps05_dt
| stg_ps05 | Normal | stg_ps1 | stg2_ps05 | stg2_ps1 | |
|---|---|---|---|---|---|
| 0 | 0.002213 | -0.000013 | 0.010031 | 0.041825 | 0.010031 |
stg2_ps1_dt=[stg_ps05_stg2_ps1_max[0],stg2_ps1_normal_max[0],stg_ps1_stg2_ps1_max[0],stg2_ps05_stg2_ps1_max[0],stg2_ps1_stg2_ps1_max[0]]
from pandas import DataFrame
stg2_ps1_dt=DataFrame(stg2_ps1_dt).T
stg2_ps1_dt.columns=["stg_ps05","Normal","stg_ps1","stg2_ps05","stg2_ps1"]
stg2_ps1_dt
| stg_ps05 | Normal | stg_ps1 | stg2_ps05 | stg2_ps1 | |
|---|---|---|---|---|---|
| 0 | 0.000861 | -0.000071 | 0.000712 | 0.010031 | 0.03852 |
normal_dt=[stg_ps05_normal_max[0],normal_normal_max[0],stg_ps1_normal_max[0],stg2_ps05_normal_max[0],stg2_ps1_normal_max[0]]
from pandas import DataFrame
normal_dt=DataFrame(normal_dt).T
normal_dt.columns=["stg_ps05","Normal","stg_ps1","stg2_ps05","stg2_ps1"]
normal_dt
| stg_ps05 | Normal | stg_ps1 | stg2_ps05 | stg2_ps1 | |
|---|---|---|---|---|---|
| 0 | 0.000043 | 0.03801 | 0.000282 | -0.000013 | -0.000071 |
corr_matrix=pd.concat([stg_ps05_dt,normal_dt,stg_ps1_dt,stg2_ps05_dt,stg2_ps1_dt])
ındex=pd.DataFrame(["stg_ps05","normal","stg_ps1","stg2_ps05","stg2_ps1"])
ındex.columns=["Corr"]
ındex.set_index("Corr")
| Corr |
|---|
| stg_ps05 |
| normal |
| stg_ps1 |
| stg2_ps05 |
| stg2_ps1 |
corr_matrix.set_index(ındex["Corr"])
| stg_ps05 | Normal | stg_ps1 | stg2_ps05 | stg2_ps1 | |
|---|---|---|---|---|---|
| Corr | |||||
| stg_ps05 | 0.041972 | 0.000043 | 0.002449 | 0.002213 | 0.000861 |
| normal | 0.000043 | 0.038010 | 0.000282 | -0.000013 | -0.000071 |
| stg_ps1 | 0.002449 | 0.000282 | 0.039275 | 0.001308 | 0.000712 |
| stg2_ps05 | 0.002213 | -0.000013 | 0.010031 | 0.041825 | 0.010031 |
| stg2_ps1 | 0.000861 | -0.000071 | 0.000712 | 0.010031 | 0.038520 |
sns.heatmap(corr_matrix, xticklabels=corr_matrix.columns, yticklabels=corr_matrix.columns,annot=True, center=0,vmin=-1, vmax=1)
<AxesSubplot:>
normal_normal_corr
| Corr_Value | |
|---|---|
| 0 | 0.041360 |
| 0 | 0.033125 |
| 0 | 0.033120 |
| 0 | 0.035139 |
| 0 | 0.061904 |
| ... | ... |
| 0 | 0.039467 |
| 0 | 0.044159 |
| 0 | 0.032853 |
| 0 | 0.059877 |
| 0 | 0.037271 |
500 rows × 1 columns
# Import library and dataset
import seaborn as sns
sns.set(rc={'figure.figsize':(11.7,8.27)})
# Method 1: on the same Axis
sns.distplot(stg_ps05_normal_corr["Corr_Value"] , color="blue", label="StyleGAN psi=0.5",bins=300)
sns.distplot(stg2_ps05_normal_corr["Corr_Value"] , color="red", label="StyleGAN2 psi=0.5",bins=300)
sns.distplot(stg_ps1_normal_corr["Corr_Value"] , color="black", label="StyleGAN psi=1",bins=300)
sns.distplot(stg2_ps1_normal_corr["Corr_Value"] , color="orange", label="StyleGAN2 psi=1",bins=300)
sns.distplot(normal_normal_corr["Corr_Value"] , color="green", label="Normal", bins=300)
plt.legend()
plt.title('Histogram of correlation between Normal Pictures')
plt.xlabel('Corr_Values')
#sns.plt.show()
C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning) C:\Users\user\anaconda3\envs\tf15\lib\site-packages\seaborn\distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning)
Text(0.5, 0, 'Corr_Values')
normal_normal_corr["Label"]=1
stg_ps05_normal_corr.reset_index(inplace=True)
stg_ps05_normal_corr.drop("index",inplace=True,axis=1)
stg2_ps05_normal_corr.reset_index(inplace=True)
stg2_ps05_normal_corr.drop("index",inplace=True,axis=1)
stg2_ps1_normal_corr.reset_index(inplace=True)
stg2_ps1_normal_corr.drop("index",inplace=True,axis=1)
stg_ps1_normal_corr.reset_index(inplace=True)
stg_ps1_normal_corr.drop("index",inplace=True,axis=1)
stg1_2=pd.concat([stg_ps05_normal_corr,stg2_ps05_normal_corr,stg2_ps1_normal_corr,stg_ps1_normal_corr],axis=1)
stg_ps05_normal_corr
| Corr_Value | |
|---|---|
| 0 | -0.000878 |
| 1 | -0.000172 |
| 2 | -0.002831 |
| 3 | -0.000212 |
| 4 | -0.003578 |
| ... | ... |
| 495 | 0.001973 |
| 496 | -0.002398 |
| 497 | 0.002643 |
| 498 | 0.001534 |
| 499 | -0.000134 |
500 rows × 1 columns
stg1_2=pd.concat([stg_ps05_normal_corr,stg2_ps05_normal_corr,stg2_ps1_normal_corr,stg_ps1_normal_corr],axis=1)
stg1_2
| Corr_Value | Corr_Value | Corr_Value | Corr_Value | |
|---|---|---|---|---|
| 0 | -0.000878 | -0.002844 | -0.006473 | -0.001067 |
| 1 | -0.000172 | -0.001596 | 0.003326 | -0.003791 |
| 2 | -0.002831 | -0.000229 | 0.000535 | 0.002194 |
| 3 | -0.000212 | 0.001705 | -0.002722 | 0.001792 |
| 4 | -0.003578 | 0.001134 | 0.001405 | 0.002685 |
| ... | ... | ... | ... | ... |
| 495 | 0.001973 | -0.001822 | 0.000236 | 0.001814 |
| 496 | -0.002398 | -0.002067 | -0.003573 | -0.003174 |
| 497 | 0.002643 | 0.001924 | 0.002968 | -0.001017 |
| 498 | 0.001534 | 0.000356 | -0.002358 | 0.001692 |
| 499 | -0.000134 | 0.001591 | 0.003772 | -0.003464 |
500 rows × 4 columns
stg_mean=pd.DataFrame(stg1_2["Corr_Value"].T.mean())
stg_mean.columns=["Corr_mean"]
stg_mean
| Corr_mean | |
|---|---|
| 0 | -0.002815 |
| 1 | -0.000558 |
| 2 | -0.000083 |
| 3 | 0.000141 |
| 4 | 0.000411 |
| ... | ... |
| 495 | 0.000550 |
| 496 | -0.002803 |
| 497 | 0.001629 |
| 498 | 0.000306 |
| 499 | 0.000441 |
500 rows × 1 columns
normal_normal_corr["Label"]=1
stg_mean["Label"]=0
normal_normal_corr["Prediction"]=1
normal_normal_corr.columns=["Corr_mean","Label","Prediction"]
df=pd.concat([stg_mean,normal_normal_corr],axis=0)
print(df.head())
print(df.shape)
Corr_mean Label Prediction 0 -0.002815 0 NaN 1 -0.000558 0 NaN 2 -0.000083 0 NaN 3 0.000141 0 NaN 4 0.000411 0 NaN (1000, 3)
df["Prediction"]=0
def state_process(cell_value):
if cell_value >0: #Threshold is 0.
return 1
else:
return 0
df.Prediction = df.Corr_mean.apply(state_process)
df
| Corr_mean | Label | Prediction | |
|---|---|---|---|
| 0 | -0.002815 | 0 | 0 |
| 1 | -0.000558 | 0 | 0 |
| 2 | -0.000083 | 0 | 0 |
| 3 | 0.000141 | 0 | 1 |
| 4 | 0.000411 | 0 | 1 |
| ... | ... | ... | ... |
| 0 | 0.039467 | 1 | 1 |
| 0 | 0.044159 | 1 | 1 |
| 0 | 0.032853 | 1 | 1 |
| 0 | 0.059877 | 1 | 1 |
| 0 | 0.037271 | 1 | 1 |
1000 rows × 3 columns
df.Prediction.value_counts()
1 768 0 232 Name: Prediction, dtype: int64
from sklearn.model_selection import cross_val_score
from sklearn import metrics
confusion_matrix=metrics.confusion_matrix(df.Label,df.Prediction)
confusion_matrix
array([[232, 268],
[ 0, 500]], dtype=int64)
auc_roc=metrics.roc_auc_score(df.Label,df.Prediction)
auc_roc
0.732
false_positive_rate, true_positive_rate, thresholds = roc_curve(df["Label"], df["Prediction"])
roc_auc = auc(false_positive_rate, true_positive_rate)
roc_auc
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate,true_positive_rate, color='red',label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--')
plt.axis('tight')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
Text(0.5, 0, 'False Positive Rate')
score=[]
threshold=[]
for i in range(-5,50):
i=i/1000
threshold.append(i)
def state_process(cell_value):
if cell_value >i:
return 1
else:
return 0
df.Prediction = df.Corr_mean.apply(state_process)
auc_roc=metrics.roc_auc_score(df.Label,df.Prediction)
score.append(auc_roc)
th_score=pd.DataFrame([score,threshold]).T
th_score.columns=["score","Threshold"]
plt.plot(th_score.Threshold,th_score.score, color='red')
plt.legend(loc = 'lower right')
plt.axis('tight')
plt.ylabel('AUC Score')
plt.xlabel('Threshold')
No handles with labels found to put in legend.
Text(0.5, 0, 'Threshold')
plt.margins(x=-0.1, y=0.010) # Values in (-0.5, 0.0) zooms in to center
plt.plot(th_score.Threshold, (th_score.score))
[<matplotlib.lines.Line2D at 0x1269e19a888>]
stylegan2_psi1 = np.array(sorted(glob(r'test\data\stylegan2_psi1.0\*.PNG')))
stylegan2_psi1_device = np.array([os.path.split(i)[1].split('0',1 )[0] for i in stylegan2_psi1])[:600]
sample=stylegan2_psi1[599:600]
sample
array(['test\\data\\stylegan2_psi1.0\\000599.png'], dtype='<U37')
from PIL import Image
basewidth = 512
img_sample = Image.open(sample[0])
wpercent = (basewidth/float(img_sample.size[0]))
hsize = int((float(img_sample.size[1])*float(wpercent)))
img_sample = img_sample.resize((basewidth,hsize), Image.ANTIALIAS)
img_sample
sample[0]
'test\\data\\stylegan2_psi1.0\\000599.png'
sample_img=prnu.extract_single(np.array(img_sample)) #extract sıngle pıcture resıduals
sample_img = np.stack(sample_img, 0)
print(sample_img.shape)
from numpy import zeros, newaxis
sample_img = sample_img[newaxis,:, :]
print(sample_img.shape)
(512, 512) (1, 512, 512)
pred=pd.DataFrame(prnu.aligned_cc(sample_img,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
print(pred, "--> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.")
0 0 -0.003205 --> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.
if pred.values >0.001: #threshold
print("Prediction is Normal Picture")
else:
print("Prediction is GAN Picture")
Prediction is GAN Picture
cat = np.array(sorted(glob(r'test\data\stylegan2_psi1.0\000918.PNG')))
basewidth = 512
img_sample_cat = Image.open(cat[0])
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
img_sample_cat
np.array(img_sample_cat).shape
(512, 512, 3)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
print(sample_res.shape)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
print(pred, "--> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.")
(512, 512)
0
0 -0.004422 --> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.
if pred.values >0.001: #threshold
print("Prediction is Normal Picture")
else:
print("Prediction is GAN Picture")
Prediction is GAN Picture
cat = np.array(sorted(glob(r'test\data\stylegan2_psi1.0\000938.PNG')))
basewidth = 512
img_sample_cat = Image.open(cat[0])
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
img_sample_cat
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
print(sample_res.shape)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
print(pred, "--> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.")
if pred.values >0.001: #threshold
print("Prediction is Normal Picture")
else:
print("Prediction is GAN Picture")
(512, 512)
0
0 -0.002328 --> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.
Prediction is GAN Picture
cat = np.array(sorted(glob(r'test\data\archieve2\53122.PNG')))
basewidth = 512
img_sample_cat = Image.open(cat[0])
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
img_sample_cat
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
print(sample_res.shape)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
print(pred, "--> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.")
if pred.values >0.005: #threshold
print("Prediction is Normal Picture")
else:
print("Prediction is GAN Picture")
(512, 512)
0
0 0.002004 --> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.
Prediction is GAN Picture
cat = np.array(sorted(glob(r'test\data\mr_ali2.JPG')))
basewidth = 512
img_sample_cat = Image.open(cat[0])
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
img_sample_cat
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
print(sample_res.shape)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
print(pred, "--> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.")
if pred.values >0.001: #threshold
print("Prediction is Normal Picture")
else:
print("Prediction is GAN Picture")
(512, 512)
0
0 0.002741 --> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.
Prediction is Normal Picture
cat = np.array(sorted(glob(r'test\data\test\normal\05457.PNG')))
basewidth = 512
img_sample_cat = Image.open(cat[0])
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
img_sample_cat
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
print(sample_res.shape)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
print(pred, "--> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.")
if pred.values >0.001: #threshold
print("Prediction is Normal Picture")
else:
print("Prediction is GAN Picture")
(512, 512)
0
0 0.001506 --> This value is mean of NCC of 500 normal pictures fingerprints and test picture's residuals.
Prediction is Normal Picture
sonucc=pd.DataFrame()
sonucc["Predıctıon"]=0
sonucc
| Predıctıon |
|---|
if pred.values >0.001: #threshold
sonucc["Predıctıon"].loc[0]=0
else:
sonucc["Predıctıon"].loc[0]=1
sonucc["Predıctıon"].loc[1]=1
sonucc
| Predıctıon |
|---|
pred.append(pred)
| 0 | |
|---|---|
| 0 | 0.001192 |
| 0 | 0.001192 |
nat_dirlist_test = np.array(sorted(glob(r'test\data\test\normal\*.PNG')))[:300]
nat_device_test = np.array([os.path.split(i)[1].split('0', 1)[0] for i in nat_dirlist_test])[:300]
stylegan_psi05_test = np.array(sorted(glob(r'test\data\test\stylegan_psi0.5\*.PNG')))[:300]
stylegan_psi05_device_test = np.array([os.path.split(i)[1].rsplit('_', 1)[0] for i in stylegan_psi05_test])[:300]
stylegan_psi1_test = np.array(sorted(glob(r'test\data\test\stylegan_psi1.0\*.PNG')))[:300]
stylegan_psi1_device_test = np.array([os.path.split(i)[1].rsplit('_', 1)[0] for i in stylegan_psi1_test])[:300]
stylegan2_psi05_test = np.array(sorted(glob(r'test\data\test\stylegan2_psi0.5\*.PNG')))[:300]
stylegan2_psi05_device_test = np.array([os.path.split(i)[1].split('0', 1)[0] for i in stylegan2_psi05_test])[:300]
stylegan2_psi05_test=stylegan2_psi05_test[:300]
stylegan2_psi1_test = np.array(sorted(glob(r'test\data\test\stylegan2_psi1.0\*.PNG')))[:300]
stylegan2_psi1_device_test = np.array([os.path.split(i)[1].split('0',1 )[0] for i in stylegan2_psi1_test])[:300]
stylegan2_psi1_test=stylegan2_psi1_test[:300]
nat_dirlist_test[3]
'test\\data\\test\\normal\\05307.png'
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = nat_dirlist_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.001: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Normal Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
print("Normal correlation mean: ", np.mean(corr))
Normal Accuracy Score: 0.5986622073578596 Normal correlation mean: 0.00022095379
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan_psi05_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.001: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN psi05 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
print("Style GAN psi05 correlation mean: ", np.mean(corr))
Style GAN psi05 Accuracy Score: 0.5986622073578596 Style GAN psi05 correlation mean: 0.00030151452
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan_psi1_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.001: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN psi1 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
print("Style GAN psi1 correlation mean: ", np.mean(corr))
Style GAN psi1 Accuracy Score: 0.6555183946488294 Style GAN psi1 correlation mean: 7.933077e-05
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan2_psi05_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.001: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN2 psi05 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
print("Style GAN2 psi05 correlation mean: ", np.mean(corr))
Style GAN2 psi05 Accuracy Score: 0.5451505016722408 Style GAN2 psi05 correlation mean: 0.00067534804
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan2_psi1_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.001: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN2 psi1 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
print("Style GAN2 psi1 correlation mean: ", np.mean(corr))
Style GAN2 psi1 Accuracy Score: 0.5953177257525084 Style GAN2 psi1 correlation mean: 0.00025719954
nat_dirlist_test
array(['test\\data\\test\\normal\\05304.png',
'test\\data\\test\\normal\\05305.png',
'test\\data\\test\\normal\\05306.png',
'test\\data\\test\\normal\\05307.png',
'test\\data\\test\\normal\\05308.png',
'test\\data\\test\\normal\\05309.png',
'test\\data\\test\\normal\\05310.png',
'test\\data\\test\\normal\\05311.png',
'test\\data\\test\\normal\\05312.png',
'test\\data\\test\\normal\\05313.png',
'test\\data\\test\\normal\\05314.png',
'test\\data\\test\\normal\\05315.png',
'test\\data\\test\\normal\\05316.png',
'test\\data\\test\\normal\\05317.png',
'test\\data\\test\\normal\\05318.png',
'test\\data\\test\\normal\\05319.png',
'test\\data\\test\\normal\\05320.png',
'test\\data\\test\\normal\\05321.png',
'test\\data\\test\\normal\\05322.png',
'test\\data\\test\\normal\\05323.png',
'test\\data\\test\\normal\\05324.png',
'test\\data\\test\\normal\\05325.png',
'test\\data\\test\\normal\\05326.png',
'test\\data\\test\\normal\\05327.png',
'test\\data\\test\\normal\\05328.png',
'test\\data\\test\\normal\\05329.png',
'test\\data\\test\\normal\\05330.png',
'test\\data\\test\\normal\\05331.png',
'test\\data\\test\\normal\\05332.png',
'test\\data\\test\\normal\\05333.png',
'test\\data\\test\\normal\\05334.png',
'test\\data\\test\\normal\\05335.png',
'test\\data\\test\\normal\\05336.png',
'test\\data\\test\\normal\\05337.png',
'test\\data\\test\\normal\\05338.png',
'test\\data\\test\\normal\\05339.png',
'test\\data\\test\\normal\\05340.png',
'test\\data\\test\\normal\\05341.png',
'test\\data\\test\\normal\\05342.png',
'test\\data\\test\\normal\\05343.png',
'test\\data\\test\\normal\\05344.png',
'test\\data\\test\\normal\\05345.png',
'test\\data\\test\\normal\\05346.png',
'test\\data\\test\\normal\\05347.png',
'test\\data\\test\\normal\\05348.png',
'test\\data\\test\\normal\\05349.png',
'test\\data\\test\\normal\\05350.png',
'test\\data\\test\\normal\\05351.png',
'test\\data\\test\\normal\\05352.png',
'test\\data\\test\\normal\\05353.png',
'test\\data\\test\\normal\\05354.png',
'test\\data\\test\\normal\\05355.png',
'test\\data\\test\\normal\\05356.png',
'test\\data\\test\\normal\\05357.png',
'test\\data\\test\\normal\\05358.png',
'test\\data\\test\\normal\\05359.png',
'test\\data\\test\\normal\\05360.png',
'test\\data\\test\\normal\\05361.png',
'test\\data\\test\\normal\\05362.png',
'test\\data\\test\\normal\\05363.png',
'test\\data\\test\\normal\\05364.png',
'test\\data\\test\\normal\\05365.png',
'test\\data\\test\\normal\\05366.png',
'test\\data\\test\\normal\\05367.png',
'test\\data\\test\\normal\\05368.png',
'test\\data\\test\\normal\\05369.png',
'test\\data\\test\\normal\\05370.png',
'test\\data\\test\\normal\\05371.png',
'test\\data\\test\\normal\\05372.png',
'test\\data\\test\\normal\\05373.png',
'test\\data\\test\\normal\\05374.png',
'test\\data\\test\\normal\\05375.png',
'test\\data\\test\\normal\\05376.png',
'test\\data\\test\\normal\\05377.png',
'test\\data\\test\\normal\\05378.png',
'test\\data\\test\\normal\\05379.png',
'test\\data\\test\\normal\\05380.png',
'test\\data\\test\\normal\\05381.png',
'test\\data\\test\\normal\\05382.png',
'test\\data\\test\\normal\\05383.png',
'test\\data\\test\\normal\\05384.png',
'test\\data\\test\\normal\\05385.png',
'test\\data\\test\\normal\\05386.png',
'test\\data\\test\\normal\\05387.png',
'test\\data\\test\\normal\\05388.png',
'test\\data\\test\\normal\\05389.png',
'test\\data\\test\\normal\\05390.png',
'test\\data\\test\\normal\\05391.png',
'test\\data\\test\\normal\\05392.png',
'test\\data\\test\\normal\\05393.png',
'test\\data\\test\\normal\\05394.png',
'test\\data\\test\\normal\\05395.png',
'test\\data\\test\\normal\\05396.png',
'test\\data\\test\\normal\\05397.png',
'test\\data\\test\\normal\\05398.png',
'test\\data\\test\\normal\\05399.png',
'test\\data\\test\\normal\\05400.png',
'test\\data\\test\\normal\\05401.png',
'test\\data\\test\\normal\\05402.png',
'test\\data\\test\\normal\\05403.png',
'test\\data\\test\\normal\\05404.png',
'test\\data\\test\\normal\\05405.png',
'test\\data\\test\\normal\\05406.png',
'test\\data\\test\\normal\\05407.png',
'test\\data\\test\\normal\\05408.png',
'test\\data\\test\\normal\\05409.png',
'test\\data\\test\\normal\\05410.png',
'test\\data\\test\\normal\\05411.png',
'test\\data\\test\\normal\\05412.png',
'test\\data\\test\\normal\\05413.png',
'test\\data\\test\\normal\\05414.png',
'test\\data\\test\\normal\\05415.png',
'test\\data\\test\\normal\\05416.png',
'test\\data\\test\\normal\\05417.png',
'test\\data\\test\\normal\\05418.png',
'test\\data\\test\\normal\\05419.png',
'test\\data\\test\\normal\\05420.png',
'test\\data\\test\\normal\\05421.png',
'test\\data\\test\\normal\\05422.png',
'test\\data\\test\\normal\\05423.png',
'test\\data\\test\\normal\\05424.png',
'test\\data\\test\\normal\\05425.png',
'test\\data\\test\\normal\\05426.png',
'test\\data\\test\\normal\\05427.png',
'test\\data\\test\\normal\\05428.png',
'test\\data\\test\\normal\\05429.png',
'test\\data\\test\\normal\\05430.png',
'test\\data\\test\\normal\\05431.png',
'test\\data\\test\\normal\\05432.png',
'test\\data\\test\\normal\\05433.png',
'test\\data\\test\\normal\\05434.png',
'test\\data\\test\\normal\\05435.png',
'test\\data\\test\\normal\\05436.png',
'test\\data\\test\\normal\\05437.png',
'test\\data\\test\\normal\\05438.png',
'test\\data\\test\\normal\\05439.png',
'test\\data\\test\\normal\\05440.png',
'test\\data\\test\\normal\\05441.png',
'test\\data\\test\\normal\\05442.png',
'test\\data\\test\\normal\\05443.png',
'test\\data\\test\\normal\\05444.png',
'test\\data\\test\\normal\\05445.png',
'test\\data\\test\\normal\\05446.png',
'test\\data\\test\\normal\\05447.png',
'test\\data\\test\\normal\\05448.png',
'test\\data\\test\\normal\\05449.png',
'test\\data\\test\\normal\\05450.png',
'test\\data\\test\\normal\\05451.png',
'test\\data\\test\\normal\\05452.png',
'test\\data\\test\\normal\\05453.png',
'test\\data\\test\\normal\\05454.png',
'test\\data\\test\\normal\\05455.png',
'test\\data\\test\\normal\\05456.png',
'test\\data\\test\\normal\\05457.png',
'test\\data\\test\\normal\\05458.png',
'test\\data\\test\\normal\\05459.png',
'test\\data\\test\\normal\\05460.png',
'test\\data\\test\\normal\\05461.png',
'test\\data\\test\\normal\\05462.png',
'test\\data\\test\\normal\\05463.png',
'test\\data\\test\\normal\\05464.png',
'test\\data\\test\\normal\\05465.png',
'test\\data\\test\\normal\\05466.png',
'test\\data\\test\\normal\\05467.png',
'test\\data\\test\\normal\\05468.png',
'test\\data\\test\\normal\\05469.png',
'test\\data\\test\\normal\\05470.png',
'test\\data\\test\\normal\\05471.png',
'test\\data\\test\\normal\\05472.png',
'test\\data\\test\\normal\\05473.png',
'test\\data\\test\\normal\\05474.png',
'test\\data\\test\\normal\\05475.png',
'test\\data\\test\\normal\\05476.png',
'test\\data\\test\\normal\\05477.png',
'test\\data\\test\\normal\\05478.png',
'test\\data\\test\\normal\\05479.png',
'test\\data\\test\\normal\\05480.png',
'test\\data\\test\\normal\\05481.png',
'test\\data\\test\\normal\\05482.png',
'test\\data\\test\\normal\\05483.png',
'test\\data\\test\\normal\\05484.png',
'test\\data\\test\\normal\\05485.png',
'test\\data\\test\\normal\\05486.png',
'test\\data\\test\\normal\\05487.png',
'test\\data\\test\\normal\\05488.png',
'test\\data\\test\\normal\\05489.png',
'test\\data\\test\\normal\\05490.png',
'test\\data\\test\\normal\\05491.png',
'test\\data\\test\\normal\\05492.png',
'test\\data\\test\\normal\\05493.png',
'test\\data\\test\\normal\\05494.png',
'test\\data\\test\\normal\\05495.png',
'test\\data\\test\\normal\\05496.png',
'test\\data\\test\\normal\\05497.png',
'test\\data\\test\\normal\\05498.png',
'test\\data\\test\\normal\\05499.png',
'test\\data\\test\\normal\\05500.png',
'test\\data\\test\\normal\\05501.png',
'test\\data\\test\\normal\\05502.png',
'test\\data\\test\\normal\\05503.png',
'test\\data\\test\\normal\\05504.png',
'test\\data\\test\\normal\\05505.png',
'test\\data\\test\\normal\\05506.png',
'test\\data\\test\\normal\\05507.png',
'test\\data\\test\\normal\\05508.png',
'test\\data\\test\\normal\\05509.png',
'test\\data\\test\\normal\\05510.png',
'test\\data\\test\\normal\\05511.png',
'test\\data\\test\\normal\\05512.png',
'test\\data\\test\\normal\\05513.png',
'test\\data\\test\\normal\\05514.png',
'test\\data\\test\\normal\\05515.png',
'test\\data\\test\\normal\\05516.png',
'test\\data\\test\\normal\\05517.png',
'test\\data\\test\\normal\\05518.png',
'test\\data\\test\\normal\\05519.png',
'test\\data\\test\\normal\\05520.png',
'test\\data\\test\\normal\\05521.png',
'test\\data\\test\\normal\\05522.png',
'test\\data\\test\\normal\\05523.png',
'test\\data\\test\\normal\\05524.png',
'test\\data\\test\\normal\\05525.png',
'test\\data\\test\\normal\\05526.png',
'test\\data\\test\\normal\\05527.png',
'test\\data\\test\\normal\\05528.png',
'test\\data\\test\\normal\\05529.png',
'test\\data\\test\\normal\\05530.png',
'test\\data\\test\\normal\\05531.png',
'test\\data\\test\\normal\\05532.png',
'test\\data\\test\\normal\\05533.png',
'test\\data\\test\\normal\\05534.png',
'test\\data\\test\\normal\\05535.png',
'test\\data\\test\\normal\\05536.png',
'test\\data\\test\\normal\\05537.png',
'test\\data\\test\\normal\\05538.png',
'test\\data\\test\\normal\\05539.png',
'test\\data\\test\\normal\\05540.png',
'test\\data\\test\\normal\\05541.png',
'test\\data\\test\\normal\\05542.png',
'test\\data\\test\\normal\\05543.png',
'test\\data\\test\\normal\\05544.png',
'test\\data\\test\\normal\\05545.png',
'test\\data\\test\\normal\\05546.png',
'test\\data\\test\\normal\\05547.png',
'test\\data\\test\\normal\\05548.png',
'test\\data\\test\\normal\\05549.png',
'test\\data\\test\\normal\\05550.png',
'test\\data\\test\\normal\\05551.png',
'test\\data\\test\\normal\\05552.png',
'test\\data\\test\\normal\\05553.png',
'test\\data\\test\\normal\\05554.png',
'test\\data\\test\\normal\\05555.png',
'test\\data\\test\\normal\\05556.png',
'test\\data\\test\\normal\\05557.png',
'test\\data\\test\\normal\\05558.png',
'test\\data\\test\\normal\\05559.png',
'test\\data\\test\\normal\\05560.png',
'test\\data\\test\\normal\\05561.png',
'test\\data\\test\\normal\\05562.png',
'test\\data\\test\\normal\\05563.png',
'test\\data\\test\\normal\\05564.png',
'test\\data\\test\\normal\\05565.png',
'test\\data\\test\\normal\\05566.png',
'test\\data\\test\\normal\\05567.png',
'test\\data\\test\\normal\\05568.png',
'test\\data\\test\\normal\\05569.png',
'test\\data\\test\\normal\\05570.png',
'test\\data\\test\\normal\\05571.png',
'test\\data\\test\\normal\\05572.png',
'test\\data\\test\\normal\\05573.png',
'test\\data\\test\\normal\\05574.png',
'test\\data\\test\\normal\\05575.png',
'test\\data\\test\\normal\\05576.png',
'test\\data\\test\\normal\\05577.png',
'test\\data\\test\\normal\\05578.png',
'test\\data\\test\\normal\\05579.png',
'test\\data\\test\\normal\\05580.png',
'test\\data\\test\\normal\\05581.png',
'test\\data\\test\\normal\\05582.png',
'test\\data\\test\\normal\\05583.png',
'test\\data\\test\\normal\\05584.png',
'test\\data\\test\\normal\\05585.png',
'test\\data\\test\\normal\\05586.png',
'test\\data\\test\\normal\\05587.png',
'test\\data\\test\\normal\\05588.png',
'test\\data\\test\\normal\\05589.png',
'test\\data\\test\\normal\\05590.png',
'test\\data\\test\\normal\\05591.png',
'test\\data\\test\\normal\\05592.png',
'test\\data\\test\\normal\\05593.png',
'test\\data\\test\\normal\\05594.png',
'test\\data\\test\\normal\\05595.png',
'test\\data\\test\\normal\\05596.png',
'test\\data\\test\\normal\\05597.png',
'test\\data\\test\\normal\\05598.png',
'test\\data\\test\\normal\\05599.png',
'test\\data\\test\\normal\\05600.png',
'test\\data\\test\\normal\\05601.png',
'test\\data\\test\\normal\\05602.png',
'test\\data\\test\\normal\\05603.png'], dtype='<U31')
print('Computing residuals')
imgs = []
for img_path in nat_dirlist_test:
imgs += [prnu.cut_ctr(np.asarray(Image.open(img_path)), (512, 512, 3))]
pool = Pool(cpu_count())
w_test = pool.map(prnu.extract_single, imgs)
pool.close()
w_test = np.stack(w_test, 0)
Computing residuals
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = nat_dirlist_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(c,sample_res)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.005: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Normal Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
Normal Accuracy Score: 0.9732441471571907
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan_psi05_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.005: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN psi05 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
Style GAN psi05 Accuracy Score: 0.9698996655518395
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan_psi1_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.005: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN psi1 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
Style GAN psi1 Accuracy Score: 0.9732441471571907
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan2_psi05_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.005: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN2 psi05 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
Style GAN2 psi05 Accuracy Score: 0.9531772575250836
predıctıon_values=[]
corr=[]
for i in range(0,299):
img_sample_cat = stylegan2_psi1_test[i]
basewidth = 512
img_sample_cat = Image.open(img_sample_cat)
wpercent = (basewidth/float(img_sample_cat.size[0]))
hsize = 512
img_sample_cat = img_sample_cat.resize((basewidth,hsize), Image.ANTIALIAS)
sample_res=prnu.extract_single(np.array(img_sample_cat)) #extract sıngle pıcture resıduals
sample_res = np.stack(sample_res, 0)
from numpy import zeros, newaxis
sample_res = sample_res[newaxis,:, :]
pred=pd.DataFrame(prnu.aligned_cc(sample_res,c)['ncc']) #ncc between test resıduals and normal pıctures fıngerprınts
if pred.values >0.005: #threshold
predıctıon_values.append(0)
corr.append(pred.values)
else:
predıctıon_values.append(1)
corr.append(pred.values)
predıctıon_values=pd.DataFrame(predıctıon_values)
predıctıon_values["label"]=1
predıctıon_values.columns=["Pred","label"]
predıctıon_values.head()
from sklearn.metrics import accuracy_score
print("Style GAN2 psi1 Accuracy Score:",accuracy_score(predıctıon_values.label,predıctıon_values.Pred))
Style GAN2 psi1 Accuracy Score: 0.9866220735785953